}
static void __init machine_specific_memory_setup(
- struct e820entry *raw, int raw_nr)
+ struct e820entry *raw, int *raw_nr)
{
- char nr = (char)raw_nr;
+ char nr = (char)*raw_nr;
sanitize_e820_map(raw, &nr);
+ *raw_nr = nr;
(void)copy_e820_map(raw, nr);
clip_4gb();
clip_mem();
}
-unsigned long __init init_e820(struct e820entry *raw, int raw_nr)
+unsigned long __init init_e820(struct e820entry *raw, int *raw_nr)
{
machine_specific_memory_setup(raw, raw_nr);
printk(KERN_INFO "Physical RAM map:\n");
* pfn_info table and allocation bitmap.
*/
static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
-#if defined(__x86_64__)
+#if defined(CONFIG_X86_64)
integer_param("xenheap_megabytes", opt_xenheap_megabytes);
#endif
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
-#if defined(__x86_64__)
+#if defined(CONFIG_X86_64)
unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
#else
unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
/* Set up and load the per-CPU TSS and LDT. */
t->bitmap = IOBMP_INVALID_OFFSET;
-#if defined(__i386__)
+#if defined(CONFIG_X86_32)
t->ss0 = __HYPERVISOR_DS;
t->esp0 = get_stack_bottom();
-#elif defined(__x86_64__)
+#elif defined(CONFIG_X86_64)
t->rsp0 = get_stack_bottom();
#endif
set_tss_desc(nr,t);
watchdog_enable();
-#ifdef __x86_64__ /* x86_32 uses low mappings when building DOM0. */
+#ifdef CONFIG_X86_64 /* x86_32 uses low mappings when building DOM0. */
zap_low_mappings();
#endif
}
for ( ; ; ) ;
}
- max_page = init_e820(e820_raw, e820_raw_nr);
+ max_page = init_e820(e820_raw, &e820_raw_nr);
/* Find the first high-memory RAM hole. */
for ( i = 0; i < e820.nr_map; i++ )
printk("Not enough memory to stash the DOM0 kernel image.\n");
for ( ; ; ) ;
}
-#if defined(__i386__)
+#if defined(CONFIG_X86_32)
memmove((void *)initial_images_start, /* use low mapping */
(void *)mod[0].mod_start, /* use low mapping */
mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
-#elif defined(__x86_64__)
+#elif defined(CONFIG_X86_64)
memmove(__va(initial_images_start),
__va(mod[0].mod_start),
mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
e820.map[i].addr + e820.map[i].size);
}
+#if defined (CONFIG_X86_64)
+ /* On x86/64 we can 1:1 map every registered memory area. */
+ /* We use the raw_e820 map because we sometimes truncate the cooked map. */
+ for ( i = 0; i < e820_raw_nr; i++ )
+ {
+ unsigned long min, sz;
+ min = (unsigned long)e820_raw[i].addr &
+ ~(((unsigned long)L1_PAGETABLE_ENTRIES << PAGE_SHIFT) - 1);
+ sz = ((unsigned long)e820_raw[i].size +
+ ((unsigned long)L1_PAGETABLE_ENTRIES << PAGE_SHIFT) - 1) &
+ ~(((unsigned long)L1_PAGETABLE_ENTRIES << PAGE_SHIFT) - 1);
+ map_pages(idle_pg_table, PAGE_OFFSET + min, min, sz, PAGE_HYPERVISOR);
+ }
+#endif
+
printk("System RAM: %luMB (%lukB)\n",
nr_pages >> (20 - PAGE_SHIFT),
nr_pages << (PAGE_SHIFT - 10));
map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags);
}
-
void __init paging_init(void)
{
- unsigned long i, p, max;
+ unsigned long i, p;
l3_pgentry_t *l3rw, *l3ro;
struct pfn_info *pg;
- /* Map all of physical memory. */
- max = ((max_page + L1_PAGETABLE_ENTRIES - 1) &
- ~(L1_PAGETABLE_ENTRIES - 1)) << PAGE_SHIFT;
- map_pages(idle_pg_table, PAGE_OFFSET, 0, max, PAGE_HYPERVISOR);
-
/*
* Allocate and map the machine-to-phys table.
* This also ensures L3 is present for ioremap().